import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split

# Define column names for the dataset
columns = ["duration","protocoltype","service","flag","srcbytes","dstbytes","land", "wrongfragment","urgent","hot","numfailedlogins","loggedin", "numcompromised","rootshell","suattempted","numroot","numfilecreations", "numshells","numaccessfiles","numoutboundcmds","ishostlogin",
"isguestlogin","count","srvcount","serrorrate", "srvserrorrate",
"rerrorrate","srvrerrorrate","samesrvrate", "diffsrvrate", "srvdiffhostrate","dsthostcount","dsthostsrvcount","dsthostsamesrvrate", "dsthostdiffsrvrate","dsthostsamesrcportrate",
"dsthostsrvdiffhostrate","dsthostserrorrate","dsthostsrvserrorrate",
"dsthostrerrorrate","dsthostsrvrerrorrate","attack", "lastflag"]

"""
#Downlod link for dataset directly
https://www.kaggle.com/datasets/anushonkar/network-anamoly-detection
"""

# Data Preprocessing
# Dropping irrelevant features
data = pd.read_csv("Train.txt", sep= ",", names=columns)
data_test = pd.read_csv("Test.txt", sep= ",", names=columns)
data.shape
data.head()
data.describe()
data.info()
data.drop(['land', 'urgent', 'numfailedlogins', 'numoutboundcmds'],axis=1, inplace=True)

#Preprocessing to data_test
data_test.drop(['land', 'urgent', 'numfailedlogins', 'numoutboundcmds'], axis=1, inplace=True)
data_test.fillna(0, inplace=True)  # Assuming you want to fill missing values with 0
data.shape
data.isna().sum()

# Handling missing values in the training dataset
clean_data = data.dropna(axis=0)
clean_data.select_dtypes(exclude=[np.number])
clean_data.shape
data['attack'].unique()
data ['attack'] = np.where(data['attack'] != "normal", "attack", "normal")
data ['attack'].unique()

#Remove object type from dataset
le = LabelEncoder()
data [ 'protocoltype'] = le.fit_transform(data['protocoltype'])
data [ 'service'] = le.fit_transform(data['service'])
data [ 'flag'] = le.fit_transform(data['flag'])
data [ 'attack'] = le.fit_transform(data['attack'])
data.info()

# Feature correlation visualization
plt.figure(figsize=(20, 15))
sns.heatmap(data.corr())

#Sorting data
scaler = MinMaxScaler()
clean_data = data.dropna(axis=0)
X = clean_data.drop("attack", axis=1)
y = clean_data['attack']
X.fillna(0, inplace=True)

# Feature Scaling
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)

# Splitting the dataset
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
X_train.shape
X_test.shape

# Random Forest Model Training with Hyperparameter Tuning
param_dist = {
    'n_estimators': [100, 200, 300, 400, 500],
    'max_features': ['sqrt'],
    'max_depth': [10, 20, 30, 40, 50],
    'min_samples_split': [2, 5, 10],
    'min_samples_leaf': [1, 2, 4],
    'bootstrap': [True]
}

# Using RandomForest
rf = RandomForestClassifier()

# Hyperparameter tuning using RandomizedSearchCV
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=param_dist, verbose=1, n_iter=10, cv=5, random_state=42, n_jobs=-1)
rf_random.fit(X_train, y_train,verbose=1)

# Displaying the best parameters
print("Best Parameters:", rf_random.best_params_)

# Model Evaluation with the best estimator
best_rf = rf_random.best_estimator_
rf_best_pred = best_rf.predict(X_test)

# Evaluation Metrics
print("Improved Accuracy:", accuracy_score(y_test, rf_best_pred))
print("Improved ROC-AUC Score:", roc_auc_score(y_test, rf_best_pred))
print("Improved Classification Report:\n", classification_report(y_test, rf_best_pred))
print("Improved Confusion Matrix:\n", confusion_matrix(y_test, rf_best_pred))